x86_emulate: On HVM MMIO emulation, cache the gva->pfn mapping for the
authorKeir Fraser <keir.fraser@citrix.com>
Thu, 27 Mar 2008 10:52:54 +0000 (10:52 +0000)
committerKeir Fraser <keir.fraser@citrix.com>
Thu, 27 Mar 2008 10:52:54 +0000 (10:52 +0000)
MMIO page. Speeds up Windows installation by about 20 percent.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
xen/arch/x86/hvm/emulate.c
xen/arch/x86/hvm/io.c
xen/arch/x86/mm/shadow/multi.c
xen/include/asm-x86/hvm/io.h
xen/include/asm-x86/hvm/vcpu.h

index 4b0a38e6e9a9480f6e9229ceed81335280c00a23..4ab2cd0301ebb308355bf38a0ba4a3125f70ff64 100644 (file)
@@ -214,7 +214,9 @@ static int __hvmemul_read(
     enum hvm_access_type access_type,
     struct hvm_emulate_ctxt *hvmemul_ctxt)
 {
+    struct vcpu *curr = current;
     unsigned long addr;
+    paddr_t gpa;
     int rc;
 
     rc = hvmemul_virtual_to_linear(
@@ -224,6 +226,17 @@ static int __hvmemul_read(
 
     *val = 0;
 
+    if ( unlikely(curr->arch.hvm_vcpu.mmio_gva == (addr & PAGE_MASK)) &&
+         curr->arch.hvm_vcpu.mmio_gva )
+    {
+        unsigned int off = addr & (PAGE_SIZE - 1);
+        if ( access_type == hvm_access_insn_fetch )
+            return X86EMUL_UNHANDLEABLE;
+        gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off);
+        if ( (off + bytes) <= PAGE_SIZE )
+            return hvmemul_do_mmio(gpa, 1, bytes, 0, IOREQ_READ, 0, 0, val);
+    }
+
     rc = ((access_type == hvm_access_insn_fetch) ?
           hvm_fetch_from_guest_virt(val, addr, bytes) :
           hvm_copy_from_guest_virt(val, addr, bytes));
@@ -233,7 +246,6 @@ static int __hvmemul_read(
     if ( rc == HVMCOPY_bad_gfn_to_mfn )
     {
         unsigned long reps = 1;
-        paddr_t gpa;
 
         if ( access_type == hvm_access_insn_fetch )
             return X86EMUL_UNHANDLEABLE;
@@ -293,7 +305,9 @@ static int hvmemul_write(
 {
     struct hvm_emulate_ctxt *hvmemul_ctxt =
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
+    struct vcpu *curr = current;
     unsigned long addr;
+    paddr_t gpa;
     int rc;
 
     rc = hvmemul_virtual_to_linear(
@@ -301,6 +315,16 @@ static int hvmemul_write(
     if ( rc != X86EMUL_OKAY )
         return rc;
 
+    if ( unlikely(curr->arch.hvm_vcpu.mmio_gva == (addr & PAGE_MASK)) &&
+         curr->arch.hvm_vcpu.mmio_gva )
+    {
+        unsigned int off = addr & (PAGE_SIZE - 1);
+        gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off);
+        if ( (off + bytes) <= PAGE_SIZE )
+            return hvmemul_do_mmio(gpa, 1, bytes, val, IOREQ_WRITE,
+                                   0, 0, NULL);
+    }
+
     rc = hvm_copy_to_guest_virt(addr, &val, bytes);
     if ( rc == HVMCOPY_bad_gva_to_gfn )
         return X86EMUL_EXCEPTION;
@@ -308,7 +332,6 @@ static int hvmemul_write(
     if ( rc == HVMCOPY_bad_gfn_to_mfn )
     {
         unsigned long reps = 1;
-        paddr_t gpa;
 
         rc = hvmemul_linear_to_phys(
             addr, &gpa, bytes, &reps, hvm_access_write, hvmemul_ctxt);
index 041d2378880e4bf2aa239893b87bcd58ca4b3e0c..bc5f8acce33867290e7f2360f1eac6af95d3b96d 100644 (file)
@@ -183,7 +183,9 @@ int handle_mmio(void)
     rc = hvm_emulate_one(&ctxt);
 
     if ( curr->arch.hvm_vcpu.io_state == HVMIO_awaiting_completion )
-       curr->arch.hvm_vcpu.io_state = HVMIO_handle_mmio_awaiting_completion;
+        curr->arch.hvm_vcpu.io_state = HVMIO_handle_mmio_awaiting_completion;
+    else
+        curr->arch.hvm_vcpu.mmio_gva = 0;
 
     switch ( rc )
     {
@@ -210,6 +212,13 @@ int handle_mmio(void)
     return 1;
 }
 
+int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn)
+{
+    current->arch.hvm_vcpu.mmio_gva = gva & PAGE_MASK;
+    current->arch.hvm_vcpu.mmio_gpfn = gpfn;
+    return handle_mmio();
+}
+
 void hvm_io_assist(void)
 {
     struct vcpu *v = current;
index 4689d132c1432224261652ac0ea1bbab7ce48cf5..f92687c246571f8c2d2a83041498151e1a1d1525 100644 (file)
@@ -2881,7 +2881,8 @@ static int sh_page_fault(struct vcpu *v,
             perfc_incr(shadow_fault_fast_mmio);
             SHADOW_PRINTK("fast path mmio %#"PRIpaddr"\n", gpa);
             reset_early_unshadow(v);
-            return handle_mmio() ? EXCRET_fault_fixed : 0;
+            return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT)
+                    ? EXCRET_fault_fixed : 0);
         }
         else
         {
@@ -3199,7 +3200,8 @@ static int sh_page_fault(struct vcpu *v,
     shadow_audit_tables(v);
     reset_early_unshadow(v);
     shadow_unlock(d);
-    return handle_mmio() ? EXCRET_fault_fixed : 0;
+    return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT)
+            ? EXCRET_fault_fixed : 0);
 
  not_a_shadow_fault:
     sh_audit_gw(v, &gw);
index 8fd631d8cfadd85c63632172550c2556ccedecdc..535f909d5a281ce316ca41e0a98208cfc82ee0cb 100644 (file)
@@ -99,6 +99,7 @@ static inline int register_buffered_io_handler(
 void send_timeoffset_req(unsigned long timeoff);
 void send_invalidate_req(void);
 int handle_mmio(void);
+int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn);
 void hvm_interrupt_post(struct vcpu *v, int vector, int type);
 void hvm_io_assist(void);
 void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq,
index 0c76551575a63680b8b5831f20f45fa63d0a42b6..d3281d20dccbbfad4ded69b2fc21a1f42fd153ab 100644 (file)
@@ -80,6 +80,15 @@ struct hvm_vcpu {
     /* I/O request in flight to device model. */
     enum hvm_io_state   io_state;
     unsigned long       io_data;
+
+    /*
+     * HVM emulation:
+     *  Virtual address @mmio_gva maps to MMIO physical frame @mmio_gpfn.
+     *  The latter is known to be an MMIO frame (not RAM).
+     *  This translation is only valid if @mmio_gva is non-zero.
+     */
+    unsigned long       mmio_gva;
+    unsigned long       mmio_gpfn;
 };
 
 #endif /* __ASM_X86_HVM_VCPU_H__ */